*eax = *ebx = *ecx = *edx = 0;
}
+void vcpu_kick(struct vcpu *v)
+{
+ /*
+ * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
+ * pending flag. These values may fluctuate (after all, we hold no
+ * locks) but the key insight is that each change will cause
+ * evtchn_upcall_pending to be polled.
+ *
+ * NB2. We save the running flag across the unblock to avoid a needless
+ * IPI for domains that we IPI'd to unblock.
+ */
+ bool_t running = v->is_running;
+ vcpu_unblock(v);
+ if ( running && (in_irq() || (v != current)) )
+ cpu_raise_softirq(v->processor, VCPU_KICK_SOFTIRQ);
+}
+
+void vcpu_mark_events_pending(struct vcpu *v)
+{
+ int already_pending = test_and_set_bit(
+ 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+ if ( already_pending )
+ return;
+
+ if ( is_hvm_vcpu(v) )
+ hvm_assert_evtchn_irq(v);
+ else
+ vcpu_kick(v);
+}
+
+static void vcpu_kick_softirq(void)
+{
+ /*
+ * Nothing to do here: we merely prevent notifiers from racing with checks
+ * executed on return to guest context with interrupts enabled. See, for
+ * example, xxx_intr_assist() executed on return to HVM guest context.
+ */
+}
+
+static int __init init_vcpu_kick_softirq(void)
+{
+ open_softirq(VCPU_KICK_SOFTIRQ, vcpu_kick_softirq);
+ return 0;
+}
+__initcall(init_vcpu_kick_softirq);
+
+
/*
* Local variables:
* mode: C
#endif
ENTRY(svm_asm_do_resume)
+ call svm_intr_assist
+
get_current(bx)
CLGI
jnz .Lsvm_process_softirqs
call svm_asid_handle_vmrun
- call svm_intr_assist
cmpb $0,addr_of(tb_init_done)
jnz .Lsvm_trace
.globl vmx_asm_do_vmentry
vmx_asm_do_vmentry:
+ call vmx_intr_assist
+
get_current(bx)
cli
cmpl $0,(r(dx),r(ax),1)
jnz .Lvmx_process_softirqs
- call vmx_intr_assist
-
testb $0xff,VCPU_vmx_emul(r(bx))
jnz .Lvmx_goto_realmode
/*.Lvmx_resume:*/
VMRESUME
+ sti
call vm_resume_fail
ud2
.Lvmx_launch:
VMLAUNCH
+ sti
call vm_launch_fail
ud2
#include <xen/shared.h>
-static inline void vcpu_kick(struct vcpu *v)
-{
- /*
- * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
- * pending flag. These values may fluctuate (after all, we hold no
- * locks) but the key insight is that each change will cause
- * evtchn_upcall_pending to be polled.
- *
- * NB2. We save the running flag across the unblock to avoid a needless
- * IPI for domains that we IPI'd to unblock.
- */
- int running = v->is_running;
- vcpu_unblock(v);
- if ( running )
- smp_send_event_check_cpu(v->processor);
-}
-
-static inline void vcpu_mark_events_pending(struct vcpu *v)
-{
- int already_pending = test_and_set_bit(
- 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
-
- if ( already_pending )
- return;
-
- if ( is_hvm_vcpu(v) )
- hvm_assert_evtchn_irq(v);
- else
- vcpu_kick(v);
-}
+void vcpu_kick(struct vcpu *v);
+void vcpu_mark_events_pending(struct vcpu *v);
int hvm_local_events_need_delivery(struct vcpu *v);
static inline int local_events_need_delivery(void)
#define NMI_MCE_SOFTIRQ (NR_COMMON_SOFTIRQS + 0)
#define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
+#define VCPU_KICK_SOFTIRQ (NR_COMMON_SOFTIRQS + 2)
-#define NR_ARCH_SOFTIRQS 2
+#define NR_ARCH_SOFTIRQS 3
#endif /* __ASM_SOFTIRQ_H__ */